# Use request library to download image_predictions data url = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv' response = requests.get(url) with open ('image-predictions.tsv', mode='wb') as file: file.write(response.content)
'''using Python's Tweepy library and store each tweet's entire set of JSON data in a file called tweet_json.txt file.''' failed_IDs = [] tweet_idlist=list(df_tweeter.tweet_id) consumer_key = '***************' consumer_secret = '******************************' access_token = '*********************************************' access_secret = '*********************************************' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth, wait_on_rate_limit=True) ''' Query Twitter's API for JSON data for each tweet ID in the Twitter archive''' '''Save each tweet's returned JSON as a new line in a .txt file''' count = 0 failed_IDs = [] start = timer() # Save each tweet's returned JSON as a new line in a .txt file with open('tweet_json.txt', 'w') as outfile: for ids in tweet_idlist: count += 1 print(str(count) + ": " + str(ids)) try: tweet = api.get_status(ids, tweet_mode='extended') print("Success") json.dump(tweet._json, outfile) outfile.write('\n') except: failed_IDs.append(ids) pass end = timer() print(end - start)